return -EINVAL;
lowest_vcpu = NULL;
- if (viosapic_load.lowest_vcpu_id < MAX_VIRT_CPUS)
+ if (viosapic_load.lowest_vcpu_id < d->max_vcpus)
lowest_vcpu = d->vcpu[viosapic_load.lowest_vcpu_id];
else if (viosapic_load.lowest_vcpu_id != VIOSAPIC_INVALID_VCPU_ID)
return -EINVAL;
if (vcpu->vcpu_id == 0) {
vtm_offset = 0UL - ia64_get_itc();
- for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+ for (i = d->max_vcpus - 1; i >= 0; i--) {
if ((v = d->vcpu[i]) != NULL) {
VMX(v, vtm).vtm_offset = vtm_offset;
VMX(v, vtm).last_itc = 0;
vtm = &VMX(vcpu, vtm);
if (vcpu->vcpu_id == 0) {
vtm_offset = new_itc - ia64_get_itc();
- for (i = MAX_VIRT_CPUS - 1; i >= 0; i--) {
+ for (i = d->max_vcpus - 1; i >= 0; i--) {
if ((v = d->vcpu[i]) != NULL) {
VMX(v, vtm).vtm_offset = vtm_offset;
VMX(v, vtm).last_itc = 0;
int id = dest >> 8;
/* Fast look: assume EID=0 ID=vcpu_id. */
- if ((dest & 0xff) == 0 && id < MAX_VIRT_CPUS)
+ if ((dest & 0xff) == 0 && id < d->max_vcpus)
return d->vcpu[id];
return NULL;
}
int i;
vcpuid = hvm_load_instance(h);
- if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+ if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
gdprintk(XENLOG_ERR,
"%s: domain has no vlsapic %u\n", __func__, vcpuid);
return -EINVAL;
vtime_t *vtm;
vcpuid = hvm_load_instance(h);
- if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+ if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
gdprintk(XENLOG_ERR,
"%s: domain has no vtime %u\n", __func__, vcpuid);
return -EINVAL;
if (d->arch.is_sioemu) {
int i;
- for (i = 1; i < MAX_VIRT_CPUS; i++)
+ for (i = 1; i < XEN_LEGACY_MAX_VCPUS; i++)
d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
}
struct pt_regs *regs;
vcpuid = hvm_load_instance(h);
- if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+ if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
gdprintk(XENLOG_ERR,
"%s: domain has no vcpu %u\n", __func__, vcpuid);
rc = -EINVAL;
int i;
vcpuid = hvm_load_instance(h);
- if (vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL) {
+ if (vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL) {
gdprintk(XENLOG_ERR,
"%s: domain has no vcpu %u\n", __func__, vcpuid);
rc = -EINVAL;
goto sendtrigger_out;
ret = -ESRCH;
- if ( (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
+ if ( op->u.sendtrigger.vcpu >= d->max_vcpus ||
+ (v = d->vcpu[op->u.sendtrigger.vcpu]) == NULL )
goto sendtrigger_out;
ret = 0;
if (!lsapic)
return -EINVAL;
- if (lsapic_nbr < MAX_VIRT_CPUS && dom0->vcpu[lsapic_nbr] != NULL)
+ if (lsapic_nbr < dom0->max_vcpus && dom0->vcpu[lsapic_nbr] != NULL)
enable = 1;
else
enable = 0;
/* Sanity! */
BUG_ON(d != dom0);
+ BUG_ON(d->vcpu == NULL);
BUG_ON(d->vcpu[0] == NULL);
BUG_ON(v->is_initialised);
// (we should be able to deal with this... later)
/* Mask all upcalls... */
- for ( i = 1; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 1; i < XEN_LEGACY_MAX_VCPUS; i++ )
d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
struct domain *d = current->domain;
/* Be sure the target exists. */
- if (cpu > MAX_VIRT_CPUS)
+ if (cpu >= d->max_vcpus)
return;
targ = d->vcpu[cpu];
if (targ == NULL)
{
int vcpu;
- for_each_vcpu_mask(vcpu, vcpu_dirty_mask) {
+ for_each_vcpu_mask(d, vcpu, vcpu_dirty_mask) {
struct vcpu* v = d->vcpu[vcpu];
if (!v->is_initialised)
continue;
}
if (HAS_PERVCPU_VHPT(d)) {
- for_each_vcpu_mask(vcpu, entry->vcpu_dirty_mask) {
+ for_each_vcpu_mask(d, vcpu, entry->vcpu_dirty_mask) {
v = d->vcpu[vcpu];
if (!v->is_initialised)
continue;
}
#if MAX_VIRT_CPUS > 1
-#define for_each_vcpu_mask(vcpu, mask) \
+#define for_each_vcpu_mask(d, vcpu, mask) \
for ((vcpu) = first_vcpu(mask); \
- (vcpu) < MAX_VIRT_CPUS; \
+ (vcpu) < d->max_vcpus; \
(vcpu) = next_vcpu((vcpu), (mask)))
#else /* NR_CPUS == 1 */
-#define for_each_vcpu_mask(vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++)
+#define for_each_vcpu_mask(d, vcpu, mask) for ((vcpu) = 0; (vcpu) < 1; (vcpu)++)
#endif /* NR_CPUS */
#define vcpumask_scnprintf(buf, len, src) \